@techreport{TD:101167,
	att_abstract={{It is now commonplace to use web conferencing technology in order to hold meetings between participants situated in different physical locations. A drawback of this technology is that nearly all of the interaction between these participants is monolingual. Here, we demonstrate a novel form of this technology that enables cross-lingual speech-to-speech communication between conference participants in real time. We model this translation problem as a combination of incremental speech recognition and segmentation, addressing the question of finding which segmentation strategy maximizes translation accuracy while minimizing latency. Our demonstration takes the form of a web conferencing scenario where a lecturer speaks in one language while talk participants listen to or read the lecturer’s translated texts in real time. This system is flexible enough to allow real-time translation of technical talks or speeches covering broad topics.}},
	att_authors={jc582p, sw275u, vk947h, sb7658},
	att_categories={},
	att_copyright={{}},
	att_copyright_notice={{}},
	att_donotupload={},
	att_private={false},
	att_projects={},
	att_tags={},
	att_techdoc={true},
	att_techdoc_key={TD:101167},
	att_url={http://web1.research.att.com:81/techdocs_downloads/TD:101167_DS1_2013-04-18T14:12:18.503Z.pdf},
	author={John Chen and Shufei Wen and Vivek kumar Rangarajan sridhar and Srinivas Bangalore},
	institution={{Interspeech 2013}},
	month={August},
	title={{Multilingual Web Conferencing Using Speech-to-Speech Translation}},
	year=2013,
}