@techreport{TD:101202,
	att_abstract={{The goal of simultaneous speech-to-speech (S2S) translation is to translate source language speech into target language with low latency. While conventional speech-to-speech (S2S) translation systems typically ignore the source language acoustic-prosodic information such as pausing, exploiting such information for simultaneous S2S translation can potentially aid in the chunking of source text into short phrases that can be subsequently translated incrementally with low latency. Such an approach is often used by human interpreters in simultaneous interpretation. In this work we investigate the phenomena of pausing in simultaneous interpretation and study the impact of utilizing such information for target language text-to-speech synthesis in a simultaneous S2S system. On one hand, we superimpose the source language pause information obtained through 
forced alignment (or decoding) in an isomorphic manner on the target side while on the other hand, we use a classifier to predict the pause information for the target text by exploiting features from the target language, source language or both. We contrast our approach with the baseline that does not use any pauses. We perform our investigation on a simultaneous interpretation corpus of Parliamentary speeches and present subjective evaluation results based on the quality of synthesized target speech.}},
	att_authors={vk947h, jc582p, sb7658, ac1234},
	att_categories={C_BB.2},
	att_copyright={{}},
	att_copyright_notice={{}},
	att_donotupload={},
	att_private={false},
	att_projects={Speech_Translation},
	att_tags={},
	att_techdoc={true},
	att_techdoc_key={TD:101202},
	att_url={http://web1.research.att.com:81/techdocs_downloads/TD:101202_DS1_2013-06-04T01:52:25.712Z.pdf},
	author={Vivek kumar Rangarajan sridhar and John Chen and Srinivas Bangalore and Alistair Conkie},
	institution={{Speech Synthesis Workshop}},
	month={August},
	title={{Role of Pausing in Text-to-Speech Synthesis for Simultaneous Interpretation}},
	year=2013,
}